* Stack of interrupts awaiting EOI on each CPU. These must be popped in
* order, as only the current highest-priority pending irq can be EOIed.
*/
-static struct {
+struct pending_eoi {
u8 vector; /* Vector awaiting EOI */
u8 ready; /* Ready for EOI now? */
-} pending_eoi[NR_CPUS][NR_VECTORS] __cacheline_aligned;
-#define pending_eoi_sp(cpu) (pending_eoi[cpu][NR_VECTORS-1].vector)
+};
+static DEFINE_PER_CPU(struct pending_eoi, pending_eoi[NR_VECTORS]);
+#define pending_eoi_sp(p) ((p)[NR_VECTORS-1].vector)
static void __do_IRQ_guest(int vector)
{
irq_desc_t *desc = &irq_desc[vector];
irq_guest_action_t *action = (irq_guest_action_t *)desc->action;
struct domain *d;
- int i, sp, cpu = smp_processor_id();
+ int i, sp;
+ struct pending_eoi *peoi = this_cpu(pending_eoi);
if ( unlikely(action->nr_guests == 0) )
{
if ( action->ack_type == ACKTYPE_EOI )
{
- sp = pending_eoi_sp(cpu);
- ASSERT((sp == 0) || (pending_eoi[cpu][sp-1].vector < vector));
+ sp = pending_eoi_sp(peoi);
+ ASSERT((sp == 0) || (peoi[sp-1].vector < vector));
ASSERT(sp < (NR_VECTORS-1));
- pending_eoi[cpu][sp].vector = vector;
- pending_eoi[cpu][sp].ready = 0;
- pending_eoi_sp(cpu) = sp+1;
- cpu_set(cpu, action->cpu_eoi_map);
+ peoi[sp].vector = vector;
+ peoi[sp].ready = 0;
+ pending_eoi_sp(peoi) = sp+1;
+ cpu_set(smp_processor_id(), action->cpu_eoi_map);
}
for ( i = 0; i < action->nr_guests; i++ )
/* Flush all ready EOIs from the top of this CPU's pending-EOI stack. */
static void flush_ready_eoi(void *unused)
{
- irq_desc_t *desc;
- int vector, sp, cpu = smp_processor_id();
+ struct pending_eoi *peoi = this_cpu(pending_eoi);
+ irq_desc_t *desc;
+ int vector, sp;
ASSERT(!local_irq_is_enabled());
- sp = pending_eoi_sp(cpu);
+ sp = pending_eoi_sp(peoi);
- while ( (--sp >= 0) && pending_eoi[cpu][sp].ready )
+ while ( (--sp >= 0) && peoi[sp].ready )
{
- vector = pending_eoi[cpu][sp].vector;
+ vector = peoi[sp].vector;
desc = &irq_desc[vector];
spin_lock(&desc->lock);
desc->handler->end(vector);
spin_unlock(&desc->lock);
}
- pending_eoi_sp(cpu) = sp+1;
+ pending_eoi_sp(peoi) = sp+1;
}
static void __set_eoi_ready(irq_desc_t *desc)
{
irq_guest_action_t *action = (irq_guest_action_t *)desc->action;
- int vector, sp, cpu = smp_processor_id();
+ struct pending_eoi *peoi = this_cpu(pending_eoi);
+ int vector, sp;
vector = desc - irq_desc;
if ( !(desc->status & IRQ_GUEST) ||
(action->in_flight != 0) ||
- !cpu_test_and_clear(cpu, action->cpu_eoi_map) )
+ !cpu_test_and_clear(smp_processor_id(), action->cpu_eoi_map) )
return;
- sp = pending_eoi_sp(cpu);
+ sp = pending_eoi_sp(peoi);
do {
ASSERT(sp > 0);
- } while ( pending_eoi[cpu][--sp].vector != vector );
- ASSERT(!pending_eoi[cpu][sp].ready);
- pending_eoi[cpu][sp].ready = 1;
+ } while ( peoi[--sp].vector != vector );
+ ASSERT(!peoi[sp].ready);
+ peoi[sp].ready = 1;
}
/* Mark specified IRQ as ready-for-EOI (if it really is) and attempt to EOI. */
{
irq_desc_t *desc;
irq_guest_action_t *action;
- int i, vector, sp, cpu = smp_processor_id();
+ struct pending_eoi *peoi = this_cpu(pending_eoi);
+ int i, vector, sp;
ASSERT(!local_irq_is_enabled());
- sp = pending_eoi_sp(cpu);
+ sp = pending_eoi_sp(peoi);
while ( --sp >= 0 )
{
- if ( pending_eoi[cpu][sp].ready )
+ if ( peoi[sp].ready )
continue;
- vector = pending_eoi[cpu][sp].vector;
+ vector = peoi[sp].vector;
desc = &irq_desc[vector];
spin_lock(&desc->lock);
action = (irq_guest_action_t *)desc->action;